home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
pcr
/
pcr4_4.lha
/
DIST
/
gc
/
GCalloc.c
< prev
next >
Wrap
C/C++ Source or Header
|
1992-02-25
|
9KB
|
311 lines
/* begincopyright
Copyright (c) 1988,1990 Xerox Corporation. All rights reserved.
Use and copying of this software and preparation of derivative works based
upon this software are permitted. Any distribution of this software or
derivative works must comply with all applicable United States export
control laws. This software is made available AS IS, and Xerox Corporation
makes no warranty about the software, its performance or its conformity to
any specification. Any person obtaining a copy of this software is requested
to send their name and post office or electronic mail address to:
PCR Coordinator
Xerox PARC
3333 Coyote Hill Rd.
Palo Alto, CA 94304
Parts of this software were derived from code bearing the copyright notice:
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
This material may be freely distributed, provided this notice is retained.
This material is provided as is, with no warranty expressed or implied.
Use at your own risk.
endcopyright */
/*
* This file contains the functions:
* void GC_new_hblk(n, ll)
* struct obj * GC_allocobj(sz)
* struct obj * GC_allocaobj(sz)
*/
/*
* Boehm, December 14, 1990 2:15:29 pm PST
*/
# include <signal.h>
# include <sys/types.h>
# include <sys/times.h>
# include <sys/timeb.h>
# include "xr/GCPrivate.h"
/* Leaving these defined enables output to stderr. In order of */
/* increasing verbosity: */
#define DEBUG /* Verbose debugging output */
#undef DEBUG
#define DEBUG2 /* EXTREMELY verbose debugging output */
#undef DEBUG2
/*
* This is an attempt at a garbage collecting storage allocator
* that should run on most UNIX systems. The garbage
* collector is overly conservative in that it may fail to reclaim
* inaccessible storage. On the other hand, it does not assume
* any runtime tag information.
* We make the following assumptions:
* 1. We are running under something that looks like Berkeley UNIX,
* on one of the supported architectures.
* 2. For every accessible object, a pointer to it is stored
* somewhere where GC_mark_roots will find it.
*/
/*
* Separate free lists are maintained for different sized objects
* up to MAXOBJSZ.
* The lists (GC_)objfreelist[i] contain free objects of size i which may
* contain nested pointers. The lists aobjfreelist[i] contain free
* atomic objects, which may not contain nested pointers.
* The call allocobj(i) insures that objfreelist[i] points to a non-empty
* free list.
* GC_allocobj may be called to allocate an object of (small) size i
* as follows:
*
* opp = &(GC_objfreelist[i]);
* if (*opp == (struct obj *)0) GC_allocobj(i);
* ptr = *opp;
* *opp = ptr->next;
*
* Note that this is very fast if the free list is non-empty; it should
* only involve the execution of 4 or 5 simple instructions.
* All composite objects on freelists are cleared, except for
* their first longword.
*/
/*
* The allocator uses allochblk to allocate large chunks of objects.
* These chunks all start on addresses which are multiples of
* HBLKSZ. All starting addresses are maintained on a contiguous
* list so that they can be traversed in the sweep phase of garbage collection.
* This makes it possible to check quickly whether an
* arbitrary address corresponds to an object administered by the
* allocator.
* We make the (probably false) claim that this can be interrupted
* by a signal with at most the loss of some chunk of memory.
*/
/* Declarations for fundamental data structures. These are grouped */
/* together, so that the collector can skip over them. */
/* This relies on some assumptions about the compiler that are not */
/* guaranteed valid, but ... */
char GC_copyright[] = "Copyright 1990 Xerox Corporation, 1988,1989 Hans-J. Boehm and Alan J. Demers";
/*
* Allocate a new heapblock for objects of size n.
* Add all of the heapblock's objects to the free list for objects
* of that size. A negative n requests atomic objects.
* Returns TRUE if successful, FALSE otherwise.
*/
bool GC_new_hblk(n)
long n;
{
register word *p,
*r;
word *last_object; /* points to last object in new hblk */
register struct hblk *h; /* the new heap block */
register long abs_sz; /* |n| */
register int i;
/* Allocate a new heap block */
h = GC_allochblk(n);
if (h == (struct hblk *)0) {
return(FALSE);
}
/* Add it to hblklist */
GC_add_hblklist(h);
/* Keep track of the fact that we allocated it in this collection cycle */
hb_last_reclaimed(h) = GC_gc_no;
/* Add objects to free list */
abs_sz = abs(n);
p = &(h -> hb_body[abs_sz]); /* second object in *h */
r = &(h -> hb_body[0]); /* One object behind p */
last_object = ((word *)((char *)h + HBLKSIZE)) - abs_sz;
/* Last place for last object to start */
/* make a list of all objects in *h with head as last object */
while (p <= last_object) {
/* current object's link points to last object */
set_obj_link((struct obj *)p, r);
r = p;
p += abs_sz;
}
p -= abs_sz; /* p now points to last object */
/*
* put p (which is now head of list of objects in *h) as first
* pointer in the appropriate free list for this size.
*/
if (n < 0) {
set_obj_link((struct obj *)(h -> hb_body), aobjfreelist[abs_sz]);
aobjfreelist[abs_sz] = ((struct obj *)p);
} else {
set_obj_link((struct obj *)(h -> hb_body), objfreelist[abs_sz]);
objfreelist[abs_sz] = ((struct obj *)p);
}
/* Try to create a map of the block layout for fast access by the marker. */
GC_add_cache_entry(abs_sz);
# ifdef DEBUG
GC_vprintf("Allocated new heap block at address 0x%X\n",
h);
# endif
return(TRUE);
}
# ifdef VISUALIZE
/* Redisplay all marked objects in a block as allocated */
undisplay_marks(hbp)
register struct hblk *hbp; /* ptr to current heap block */
{
register int word_no; /* Number of word in block */
register int sz; /* size of objects in current block */
register int i = 0;
int start_word_no;
sz = HB_SIZE(hbp);
word_no = start_word_no = BYTES_TO_WORDS(HDR_BYTES);
while (word_no + sz <= BYTES_TO_WORDS(HBLKSIZE)
|| word_no == start_word_no) {
if (mark_bit(hbp, word_no)) {
displayAlloc(((word *)hbp) + word_no, sz);
}
word_no += sz;
}
}
# endif
/*
* Try to ensure the composite object free list for sz is not empty.
* Caller is NOT expected to hold GC_allocate_ml.
* Returns FALSE if no space is available.
* Since we are called outside any monitor lock, caller should
* check the free list after we return.
*/
bool GC_allocobj(sz)
long sz;
{
register struct hblk ** rlp = &(reclaim_list[sz]);
register bool signalled_full = FALSE; /* Already called GC_heap_full */
register bool result;
# ifdef VISUALIZE
window_update();
# endif
for (;;) {
/* Attempt to fill free list */
while (objfreelist[sz] == ((struct obj *)0)
&& *rlp != (struct hblk *)0) {
/* Safe even if *rlp changed in the interim */
GC_reclaim_composite(rlp);
}
if (objfreelist[sz] != (struct obj *)0) {
return(TRUE);
}
if (GC_hblkfreelist != 0) {
XR_MonitorEntry(&GC_allocate_ml);
# ifdef BLACK_LIST
if (!is_black_listed(GC_hblkfreelist)
|| GC_sufficient_hb(sz)) {
# endif
result = GC_new_hblk(sz);
XR_MonitorExit(&GC_allocate_ml);
return(result);
# ifdef BLACK_LIST
} else {
XR_MonitorExit(&GC_allocate_ml);
}
# endif
}
if (signalled_full) break;
XR_MonitorEntry(&GC_allocate_ml);
if (GC_DIV * GC_non_gc_bytes < GC_MULT * GC_heapsize
|| GC_fix_heap_size) {
GC_heap_full();
signalled_full = TRUE;
} else {
GC_expand_hp(NON_GC_HINCR, FALSE);
}
XR_MonitorExit(&GC_allocate_ml);
}
XR_MonitorEntry(&GC_allocate_ml);
result = GC_new_hblk(sz);
XR_MonitorExit(&GC_allocate_ml);
return(result);
}
/*
* The same as above, but for atomic objects.
*/
bool GC_allocaobj(sz)
long sz;
{
register struct hblk ** rlp = &(areclaim_list[sz]);
register bool signalled_full = FALSE;
register bool result;
# ifdef VISUALIZE
window_update();
# endif
for (;;) {
/* Attempt to fill free list */
while (aobjfreelist[sz] == ((struct obj *)0)
&& *rlp != (struct hblk *)0) {
/* Safe even if *rlp changed in the interim */
GC_reclaim_atomic(rlp);
}
if (aobjfreelist[sz] != (struct obj *)0) {
return(TRUE);
}
if (GC_hblkfreelist != 0) {
XR_MonitorEntry(&GC_allocate_ml);
result = GC_new_hblk(-sz);
XR_MonitorExit(&GC_allocate_ml);
return(result);
}
if (signalled_full) break;
XR_MonitorEntry(&GC_allocate_ml);
if (GC_DIV * GC_non_gc_bytes < GC_MULT * GC_heapsize) {
GC_heap_full();
signalled_full = TRUE;
} else {
GC_expand_hp(NON_GC_HINCR, FALSE);
}
XR_MonitorExit(&GC_allocate_ml);
}
XR_MonitorEntry(&GC_allocate_ml);
result = GC_new_hblk(-sz);
XR_MonitorExit(&GC_allocate_ml);
return(result);
}